home *** CD-ROM | disk | FTP | other *** search
/ Cream of the Crop 1 / Cream of the Crop 1.iso / PROGRAM / CBGRX100.ARJ / BITCOPY.H < prev    next >
Text File  |  1992-04-10  |  13KB  |  362 lines

  1. /** 
  2.  ** BITCOPY.H 
  3.  **
  4.  **  Copyright (C) 1992, Csaba Biegl
  5.  **    820 Stirrup Dr, Nashville, TN, 37221
  6.  **    csaba@vuse.vanderbilt.edu
  7.  **
  8.  **  This file is distributed under the terms listed in the document
  9.  **  "copying.cb", available from the author at the address above.
  10.  **  A copy of "copying.cb" should accompany this file; if not, a copy
  11.  **  should be available from where this file was obtained.  This file
  12.  **  may not be distributed without a verbatim copy of "copying.cb".
  13.  **  You should also have received a copy of the GNU General Public
  14.  **  License along with this program (it is in the file "copying");
  15.  **  if not, write to the Free Software Foundation, Inc., 675 Mass Ave,
  16.  **  Cambridge, MA 02139, USA.
  17.  **
  18.  **  This program is distributed in the hope that it will be useful,
  19.  **  but WITHOUT ANY WARRANTY; without even the implied warranty of
  20.  **  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21.  **  GNU General Public License for more details.
  22.  **/
  23.  
  24. #ifndef _BITCOPY_H_
  25. #define _BITCOPY_H_
  26.  
  27. #ifdef  __TURBOC__
  28. #pragma inline
  29. #endif
  30.  
  31. /*
  32.  * utilities -- other files may define them too
  33.  */
  34. #ifndef _SaveDS
  35.  
  36. #ifdef  __TURBOC__
  37. #define _ClrDir()    asm cld
  38. #define _SetDir()    asm std
  39. #define _SaveDS()    asm push ds
  40. #define _RestoreDS()    asm pop  ds
  41. #endif
  42.  
  43. #ifdef  __GNUC__
  44. #define _ASV        asm volatile
  45. #define _ClrDir()    _ASV("cld")
  46. #define _SetDir()    _ASV("std")
  47. #define _SaveDS()
  48. #define _RestoreDS()
  49. #endif
  50.  
  51. #endif  /* _SaveDS */
  52.  
  53.  
  54. #ifdef     __TURBOC__
  55. /*
  56.  * Put home a single byte with various logical operations
  57.  *    es:di    - dest (to be incremented)
  58.  *    al    - byte to be written, already shifted if necessary
  59.  */
  60. #define __CPYBYTE__    asm stosb
  61. #define __XORBYTE__    asm xor al,BYTE PTR es:[di]; asm stosb
  62. #define __ORBYTE__    asm or  al,BYTE PTR es:[di]; asm stosb
  63. #define __ANDBYTE__    asm and al,BYTE PTR es:[di]; asm stosb
  64.  
  65. /*
  66.  * unshifted byte sized line copying with various logical operations
  67.  *    es:di    - dest
  68.  *    ds:si    - source
  69.  *    cx    - width
  70.  */
  71. #define __CPYLINE__(label)  asm rep movsb
  72. #define __XORLINE__(label)  label: asm lodsb; __XORBYTE__; asm loop label
  73. #define __ORLINE__(label)   label: asm lodsb; __ORBYTE__;  asm loop label
  74. #define __ANDLINE__(label)  label: asm lodsb; __ANDBYTE__; asm loop label
  75.  
  76. /*
  77.  * Copy a single byte with edge masking and various logical ops
  78.  *    es:di    - dest (to be incremented)
  79.  *    al    - source byte, shifted if necessary
  80.  *    maskreg - has mask
  81.  */
  82. #define __CPYMASK__(maskreg)                        \
  83.     asm and    al,maskreg;                        \
  84.     asm not    maskreg;                        \
  85.     asm and    maskreg,BYTE PTR es:[di];                \
  86.     asm or    al,maskreg;                        \
  87.     asm stosb
  88. #define __XORMASK__(maskreg)                        \
  89.     asm and    al,maskreg;                        \
  90.     asm xor    al,BYTE PTR es:[di];                    \
  91.     asm stosb
  92. #define __ORMASK__(maskreg)                        \
  93.     asm and    al,maskreg;                        \
  94.     asm or    al,BYTE PTR es:[di];                    \
  95.     asm stosb
  96. #define __ANDMASK__(maskreg)                        \
  97.     asm and    al,maskreg;                        \
  98.     asm not    maskreg;                        \
  99.     asm or    al,maskreg;                        \
  100.     asm and    al,BYTE PTR es:[di];                    \
  101.     asm stosb
  102.  
  103. /*
  104.  * aligned line copying with masking if necessary
  105.  */
  106. #define __CPYMSKLINE__(dst,src,masks,wdt,type) do {            \
  107.     _BX = masks;                            \
  108.     _CX = wdt;                                \
  109.     asm les    di,DWORD PTR dst;                    \
  110.     asm lds    si,DWORD PTR src;                    \
  111.     if(_BL) { asm lodsb; type##MASK__(bl); }                \
  112.     type##LINE__(LineCopy##type##dst##src##Loop);            \
  113.     if(_BH) { asm lodsb; type##MASK__(bh); }                \
  114. } while(0)
  115.  
  116. #define _CopyMskLine(d,s,m,w)        __CPYMSKLINE__(d,s,m,w,__CPY)
  117. #define _CopyMskLineXor(d,s,m,w)    __CPYMSKLINE__(d,s,m,w,__XOR)
  118. #define _CopyMskLineOr(d,s,m,w)        __CPYMSKLINE__(d,s,m,w,__OR)
  119. #define _CopyMskLineAnd(d,s,m,w)    __CPYMSKLINE__(d,s,m,w,__AND)
  120.  
  121. /*
  122.  * edge masking only for aligned line copy
  123.  * NOTE: called when the width of the middle part is 0 or when it
  124.  * has been copied using the VGA latches all four planes at once!
  125.  */
  126. #define __CPYMSKEDGE__(dst,src,masks,wdt,type) do {            \
  127.     _BX = masks;                            \
  128.     _CX = wdt;                                \
  129.     asm les    di,DWORD PTR dst;                    \
  130.     asm lds    si,DWORD PTR src;                    \
  131.     if(_BL) { asm lodsb; type##MASK__(bl); }                \
  132.     asm add    di,cx;                            \
  133.     asm add    si,cx;                            \
  134.     if(_BH) { asm lodsb; type##MASK__(bh); }                \
  135. } while(0)
  136.  
  137. #define _CopyMskEdge(d,s,m,w)        __CPYMSKEDGE__(d,s,m,w,__CPY)
  138. #define _CopyMskEdgeXor(d,s,m,w)    __CPYMSKEDGE__(d,s,m,w,__XOR)
  139. #define _CopyMskEdgeOr(d,s,m,w)        __CPYMSKEDGE__(d,s,m,w,__OR)
  140. #define _CopyMskEdgeAnd(d,s,m,w)    __CPYMSKEDGE__(d,s,m,w,__AND)
  141.  
  142. /*
  143.  * non-aligned shifted copying (need separate fwd and reverse)
  144.  */
  145. #define __SHIFTLINE__(dst,src,sft,msk,wdt,type,dir) do {        \
  146.     _BX = msk;                                \
  147.     _CX = sft;                                \
  148.     _DX = wdt;                                \
  149.     asm les    di,DWORD PTR dst;                    \
  150.     asm lds    si,DWORD PTR src;                    \
  151.     asm and    ch,1;                            \
  152.     asm jz    Shift##dir##type##dst##src##NoInit;            \
  153.     asm lodsb;                                \
  154.     asm xor    ah,ah;                            \
  155.     asm ro##dir ax,cl;                            \
  156.     asm mov    ch,ah;                            \
  157.     Shift##dir##type##dst##src##NoInit:                    \
  158.     if(_BL) {                                \
  159.     asm lodsb;                            \
  160.     asm xor        ah,ah;                        \
  161.     asm ro##dir ax,cl;                        \
  162.     asm or        al,ch;                        \
  163.     asm mov        ch,ah;                        \
  164.     type##MASK__(bl);                        \
  165.     }                                    \
  166.     if(_DX) {                                \
  167.     Shift##dir##type##dst##src##Loop:                    \
  168.     asm lodsb;                            \
  169.     asm xor        ah,ah;                        \
  170.     asm ro##dir ax,cl;                        \
  171.     asm or        al,ch;                        \
  172.     asm mov        ch,ah;                        \
  173.     type##BYTE__;                            \
  174.     asm dec        dx;                            \
  175.     asm jnz        Shift##dir##type##dst##src##Loop;            \
  176.     }                                    \
  177.     if(_BH) {                                \
  178.     _DX = sft;                            \
  179.     asm and        dh,2;                        \
  180.     asm jnz        Shift##dir##type##dst##src##NoLastByte;        \
  181.     asm lodsb;                            \
  182.     Shift##dir##type##dst##src##NoLastByte:                \
  183.     asm sh##dir al,cl;                        \
  184.     asm or        al,ch;                        \
  185.     type##MASK__(bh);                        \
  186.     }                                    \
  187. } while(0)
  188.  
  189. #define _FwdShiftLine(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__CPY,r)
  190. #define _FwdShiftLineXor(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__XOR,r)
  191. #define _FwdShiftLineOr(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__OR,r)
  192. #define _FwdShiftLineAnd(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__AND,r)
  193.  
  194. #define _RevShiftLine(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__CPY,l)
  195. #define _RevShiftLineXor(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__XOR,l)
  196. #define _RevShiftLineOr(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__OR,l)
  197. #define _RevShiftLineAnd(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__AND,l)
  198. #endif  /* __TURBOC__ */
  199.  
  200. #ifdef  __GNUC__
  201. /*
  202.  * Put home a single byte with various logical operations
  203.  *    edi    - dest (to be incremented)
  204.  *    al    - byte to be written, already shifted if necessary
  205.  */
  206. #define __CPYBYTE__    "stosb"
  207. #define __XORBYTE__    "xorb (%%edi),%%al; stosb"
  208. #define __ORBYTE__    "orb  (%%edi),%%al; stosb"
  209. #define __ANDBYTE__    "andb (%%edi),%%al; stosb"
  210.  
  211. /*
  212.  * unshifted byte sized line copying with various logical operations
  213.  *    edi    - dest
  214.  *    dsi    - source
  215.  *    cx    - width
  216.  */
  217. #define __CPYLINE__(label)  #label": rep; movsb"
  218. #define __XORLINE__(label)  #label": lodsb; "__XORBYTE__" ; loop "#label
  219. #define __ORLINE__(label)   #label": lodsb; "__ORBYTE__ " ; loop "#label
  220. #define __ANDLINE__(label)  #label": lodsb; "__ANDBYTE__" ; loop "#label
  221.  
  222. /*
  223.  * Copy a single byte with edge masking and various logical ops
  224.  *    edi    - dest (to be incremented)
  225.  *    al    - source byte, shifted if necessary
  226.  *    maskreg - has mask
  227.  */
  228. #define __CPYMASK__(MS) \
  229.     "andb "#MS",%%al; notb "#MS"; andb (%%edi),"#MS"; orb "#MS",%%al; stosb"
  230. #define __XORMASK__(MS) \
  231.     "andb "#MS",%%al; xorb (%%edi),%%al; stosb"
  232. #define __ORMASK__(MS) \
  233.     "andb "#MS",%%al; orb (%%edi),%%al; stosb"
  234. #define __ANDMASK__(MS) \
  235.     "andb "#MS",%%al; notb "#MS"; orb "#MS",%%al; andb (%%edi),%%al; stosb"
  236.  
  237. /*
  238.  * aligned line copying with masking if necessary
  239.  * NOTE: does not check for zero middle part width!!!
  240.  */
  241. #define __CPYMSKLINE__(dst,src,masks,wdt,type) _ASV("                 \n\
  242.     movl    %0,%%edi                          \n\
  243.     movl    %1,%%esi                          \n\
  244.     movl    %2,%%ebx                          \n\
  245.     movl    %3,%%ecx                          \n\
  246.     orb    %%bl,%%bl                          \n\
  247.     jz    L_CpyLine"#type"BodyLoop                  \n\
  248.     lodsb                                  \n\
  249.     "type##MASK__(%%bl)"                          \n\
  250.     "type##LINE__(L_CpyLine##type##BodyLoop)"              \n\
  251.     orb    %%bh,%%bh                          \n\
  252.     jz    L_CpyLine"#type"NoLastMask                  \n\
  253.     lodsb                                  \n\
  254.     "type##MASK__(%%bh)"                          \n\
  255. L_CpyLine"#type"NoLastMask:                           "\
  256.     : /* NOTHING */                            \
  257.     : "g" (dst), "g" (src), "g" (masks), "g" (wdt)            \
  258.     : "di", "si", "cx", "bx", "ax"                    \
  259. )
  260.  
  261. #define _CopyMskLine(d,s,m,w)        __CPYMSKLINE__(d,s,m,w,__CPY)
  262. #define _CopyMskLineXor(d,s,m,w)    __CPYMSKLINE__(d,s,m,w,__XOR)
  263. #define _CopyMskLineOr(d,s,m,w)        __CPYMSKLINE__(d,s,m,w,__OR)
  264. #define _CopyMskLineAnd(d,s,m,w)    __CPYMSKLINE__(d,s,m,w,__AND)
  265.  
  266. /*
  267.  * edge masking only for aligned line copy
  268.  * NOTE: called when the width of the middle part is 0 or when it
  269.  * has been copied using the VGA latches all four planes at once!
  270.  */
  271. #define __CPYMSKEDGE__(dst,src,masks,wdt,type) _ASV("                 \n\
  272.     movl    %0,%%edi                          \n\
  273.     movl    %1,%%esi                          \n\
  274.     movl    %2,%%ebx                          \n\
  275.     orb    %%bl,%%bl                          \n\
  276.     jz    L_CpyEdge"#type"SkipBody                  \n\
  277.     lodsb                                  \n\
  278.     "type##MASK__(%%bl)"                          \n\
  279. L_CpyEdge"#type"SkipBody:                          \n\
  280.     addl    %3,%%edi                          \n\
  281.     addl    %3,%%esi                          \n\
  282.     orb    %%bh,%%bh                          \n\
  283.     jz    L_CpyEdge"#type"NoLastMask                  \n\
  284.     lodsb                                  \n\
  285.     "type##MASK__(%%bh)"                          \n\
  286. L_CpyEdge"#type"NoLastMask:                           "\
  287.     : /* NOTHING */                            \
  288.     : "g" (dst), "g" (src), "g" (masks), "g" (wdt)            \
  289.     : "di", "si", "bx", "ax"                    \
  290. )
  291.  
  292. #define _CopyMskEdge(d,s,m,w)        __CPYMSKEDGE__(d,s,m,w,__CPY)
  293. #define _CopyMskEdgeXor(d,s,m,w)    __CPYMSKEDGE__(d,s,m,w,__XOR)
  294. #define _CopyMskEdgeOr(d,s,m,w)        __CPYMSKEDGE__(d,s,m,w,__OR)
  295. #define _CopyMskEdgeAnd(d,s,m,w)    __CPYMSKEDGE__(d,s,m,w,__AND)
  296.  
  297. /*
  298.  * non-aligned shifted copying (need separate fwd and reverse)
  299.  */
  300. #define __SHIFTLINE__(dst,src,sft,msk,wdt,type,dir) _ASV("            \n\
  301.     movl    %0,%%edi                          \n\
  302.     movl    %1,%%esi                          \n\
  303.     movl    %2,%%ecx                          \n\
  304.     movl    %3,%%ebx                          \n\
  305.     movl    %4,%%edx                          \n\
  306.     andb    $1,%%ch                              \n\
  307.     jz    L_SftLine"#dir#type"ChkMask1                  \n\
  308.     lodsb                                  \n\
  309.     xorb    %%ah,%%ah                          \n\
  310.     ro"#dir"w    %%cl,%%ax                          \n\
  311.     movb    %%ah,%%ch                          \n\
  312. L_SftLine"#dir#type"ChkMask1:                          \n\
  313.     orb    %%bl,%%bl                          \n\
  314.     jz    L_SftLine"#dir#type"ChkWidth                  \n\
  315.     lodsb                                  \n\
  316.     xorb    %%ah,%%ah                          \n\
  317.     ro"#dir"w    %%cl,%%ax                          \n\
  318.     orb    %%ch,%%al                          \n\
  319.     movb    %%ah,%%ch                          \n\
  320.     "type##MASK__(%%bl)"                          \n\
  321. L_SftLine"#dir#type"ChkWidth:                          \n\
  322.     orl    %%edx,%%edx                          \n\
  323.     jz    L_SftLine"#dir#type"ChkMask2                  \n\
  324. L_SftLine"#dir#type"BodyLoop:                          \n\
  325.     lodsb                                  \n\
  326.     xorb    %%ah,%%ah                          \n\
  327.     ro"#dir"w    %%cl,%%ax                          \n\
  328.     orb    %%ch,%%al                          \n\
  329.     movb    %%ah,%%ch                          \n\
  330.     "type##BYTE__"                              \n\
  331.     decl    %%edx                              \n\
  332.     jnz    L_SftLine"#dir#type"BodyLoop                  \n\
  333. L_SftLine"#dir#type"ChkMask2:                          \n\
  334.     orb    %%bh,%%bh                          \n\
  335.     jz    L_SftLine"#dir#type"EndLine                  \n\
  336.     movl    %2,%%edx                          \n\
  337.     andb    $2,%%dh                              \n\
  338.     jnz    L_SftLine"#dir#type"NoLastByte                  \n\
  339.     lodsb                                  \n\
  340. L_SftLine"#dir#type"NoLastByte:                          \n\
  341.     sh"#dir"b    %%cl,%%al                          \n\
  342.     orb    %%ch,%%al                          \n\
  343.     "type##MASK__(%%bh)"                          \n\
  344. L_SftLine"#dir#type"EndLine:                           "\
  345.     : /* NOTHING */                            \
  346.     : "g" (dst), "g" (src), "g" (sft), "g" (msk), "g" (wdt)        \
  347.     : "di", "si", "dx", "cx", "bx", "ax"                \
  348. )
  349.  
  350. #define _FwdShiftLine(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__CPY,r)
  351. #define _FwdShiftLineXor(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__XOR,r)
  352. #define _FwdShiftLineOr(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__OR,r)
  353. #define _FwdShiftLineAnd(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__AND,r)
  354.  
  355. #define _RevShiftLine(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__CPY,l)
  356. #define _RevShiftLineXor(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__XOR,l)
  357. #define _RevShiftLineOr(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__OR,l)
  358. #define _RevShiftLineAnd(d,s,sft,m,w)    __SHIFTLINE__(d,s,sft,m,w,__AND,l)
  359. #endif  /* __GNUC__ */
  360.  
  361. #endif  /* whole file */
  362.